bitkeeper revision 1.1269 (42524412_qy8UFAnO7-nfNL1poTqIg)
authormafetter@fleming.research <mafetter@fleming.research>
Tue, 5 Apr 2005 07:53:54 +0000 (07:53 +0000)
committermafetter@fleming.research <mafetter@fleming.research>
Tue, 5 Apr 2005 07:53:54 +0000 (07:53 +0000)
Hand merge

Signed-off-by: michael.fetterman@cl.cam.ac.uk
1  2 
.rootkeys
tools/libxc/xc.h
xen/arch/x86/domain.c
xen/arch/x86/mm.c
xen/arch/x86/traps.c
xen/common/grant_table.c
xen/common/page_alloc.c
xen/include/asm-x86/domain.h
xen/include/asm-x86/mm.h
xen/include/xen/domain.h

diff --cc .rootkeys
Simple merge
Simple merge
index 1eef780c98d951d1e3215089b7f3d46e4f63e3b8,8fe3d4b0e77a3dc9e6a1e2e07c809dd39316d225..322181ffdb1cb7b9ae5cd0f1792301d3b6d02526
@@@ -277,8 -265,9 +267,10 @@@ void arch_do_createdomain(struct exec_d
              mk_l3_pgentry(__pa(d->arch.mm_perdomain_l2) | __PAGE_HYPERVISOR);
  #endif
  
+         (void)ptwr_init(d);
          shadow_lock_init(d);        
 +        INIT_LIST_HEAD(&d->arch.free_shadow_frames);
      }
  }
  
index 9ee905cea919b533430bee4d096566ecf76e1cfd,2dd5de5418d96714ed80c863d02587dcbf178e5b..54fde45fa221a04d5b6ede6a6887459ed4c818d5
@@@ -2427,20 -2304,15 +2431,18 @@@ int ptwr_debug = 0x0
  #endif
  
  /* Flush the given writable p.t. page and write-protect it again. */
- void ptwr_flush(const int which)
+ void ptwr_flush(struct domain *d, const int which)
  {
 -    unsigned long  sstat, spte, pte, *ptep, l1va;
 -    l1_pgentry_t  *sl1e = NULL, *pl1e, ol1e, nl1e;
 +    unsigned long  pte, *ptep, l1va;
 +    l1_pgentry_t  *pl1e, ol1e, nl1e;
      l2_pgentry_t  *pl2e;
-     int            i, cpu = smp_processor_id();
-     struct exec_domain *ed = current;
-     struct domain *d = ed->domain;
+     int            i;
      unsigned int   modified = 0;
  
-     l1va = ptwr_info[cpu].ptinfo[which].l1va;
 +    // not supported in combination with various shadow modes!
 +    ASSERT( !shadow_mode_enabled(d) );
 +    
+     l1va = d->arch.ptwr[which].l1va;
      ptep = (unsigned long *)&linear_pg_table[l1_linear_offset(l1va)];
  
      /*
          put_page_from_l1e(ol1e, d);
      }
      unmap_domain_mem(pl1e);
 -
 +    
      perfc_incr_histo(wpt_updates, modified, PT_UPDATES);
-     ptwr_info[cpu].ptinfo[which].prev_exec_domain = ed;
-     ptwr_info[cpu].ptinfo[which].prev_nr_updates  = modified;
+     d->arch.ptwr[which].prev_nr_updates  = modified;
  
      /*
       * STEP 3. Reattach the L1 p.t. page into the current address space.
       */
  
 -    if ( (which == PTWR_PT_ACTIVE) && likely(!shadow_mode_enabled(d)) )
 +    if ( which == PTWR_PT_ACTIVE )
      {
-         pl2e = &__linear_l2_table[ptwr_info[cpu].ptinfo[which].l2_idx];
 -        pl2e = &linear_l2_table[d->arch.ptwr[which].l2_idx];
++        pl2e = &__linear_l2_table[d->arch.ptwr[which].l2_idx];
          *pl2e = mk_l2_pgentry(l2_pgentry_val(*pl2e) | _PAGE_PRESENT); 
      }
  
       * STEP 4. Final tidy-up.
       */
  
-     ptwr_info[cpu].ptinfo[which].l1va = 0;
+     d->arch.ptwr[which].l1va = 0;
 -
 -    if ( unlikely(sl1e != NULL) )
 -    {
 -        unmap_domain_mem(sl1e);
 -        put_shadow_status(d);
 -    }
  }
  
  static int ptwr_emulated_update(
@@@ -2674,17 -2571,16 +2675,16 @@@ static struct x86_mem_emulator ptwr_mem
  };
  
  /* Write page fault handler: check if guest is trying to modify a PTE. */
- int ptwr_do_page_fault(unsigned long addr)
+ int ptwr_do_page_fault(struct domain *d, unsigned long addr)
  {
-     unsigned long       pte, pfn, l2e;
-     struct pfn_info    *page;
-     l2_pgentry_t       *pl2e;
-     int                 which, cpu = smp_processor_id();
-     u32                 l2_idx;
-     struct exec_domain *ed = current;
+     unsigned long    pte, pfn, l2e;
+     struct pfn_info *page;
+     l2_pgentry_t    *pl2e;
+     int              which;
+     u32              l2_idx;
  
 -    /* Can't use linear_l2_table with external tables. */
 -    BUG_ON(shadow_mode_external(d));
 +    if ( unlikely(shadow_mode_enabled(ed->domain)) )
 +        return 0;
  
      /*
       * Attempt to read the PTE that maps the VA being accessed. By checking for
       * If last batch made no updates then we are probably stuck. Emulate this 
       * update to ensure we make progress.
       */
-     if ( (ptwr_info[cpu].ptinfo[which].prev_exec_domain == ed) &&
-          (ptwr_info[cpu].ptinfo[which].prev_nr_updates  == 0) )
-     {
-         /* Force non-emul next time, or we can get stuck emulating forever. */
-         ptwr_info[cpu].ptinfo[which].prev_exec_domain = NULL;
+     if ( d->arch.ptwr[which].prev_nr_updates == 0 )
          goto emulate;
-     }
  
-     ptwr_info[cpu].ptinfo[which].l1va   = addr | 1;
-     ptwr_info[cpu].ptinfo[which].l2_idx = l2_idx;
+     d->arch.ptwr[which].l1va   = addr | 1;
+     d->arch.ptwr[which].l2_idx = l2_idx;
      
      /* For safety, disconnect the L1 p.t. page from current space. */
 -    if ( (which == PTWR_PT_ACTIVE) && 
 -         likely(!shadow_mode_enabled(d)) )
 +    if ( which == PTWR_PT_ACTIVE )
      {
          *pl2e = mk_l2_pgentry(l2e & ~_PAGE_PRESENT);
          local_flush_tlb(); /* XXX Multi-CPU guests? */
      if ( unlikely(__put_user(pte, (unsigned long *)
                               &linear_pg_table[addr>>PAGE_SHIFT])) )
      {
 -        MEM_LOG("ptwr: Could not update pte at %p\n", (unsigned long *)
 +        MEM_LOG("ptwr: Could not update pte at %p", (unsigned long *)
                  &linear_pg_table[addr>>PAGE_SHIFT]);
          /* Toss the writable pagetable state and crash. */
-         unmap_domain_mem(ptwr_info[cpu].ptinfo[which].pl1e);
-         ptwr_info[cpu].ptinfo[which].l1va = 0;
+         unmap_domain_mem(d->arch.ptwr[which].pl1e);
+         d->arch.ptwr[which].l1va = 0;
          domain_crash();
          return 0;
      }
Simple merge
index 351568cc19ac83cce4595c459ab24559a0237b69,c78720022fc636ff0d1d2d72d9db2e865c7695d5..14648c3affac01c3807bac1798e7e2c0a581c51b
@@@ -194,12 -159,12 +159,12 @@@ __gnttab_activate_grant_ref
  
          /* rmb(); */ /* not on x86 */
  
-         frame = __gpfn_to_mfn_foreign(rd, sha->frame);
 -        frame = __translate_gpfn_to_mfn(granting_d, sha->frame);
++        frame = __gpfn_to_mfn_foreign(granting_d, sha->frame);
  
          if ( unlikely(!pfn_is_ram(frame)) ||
-              unlikely(!((flags & GNTMAP_readonly) ?
-                         get_page(&frame_table[frame], rd) :
-                         get_page_and_type(&frame_table[frame], rd,
+              unlikely(!((dev_hst_ro_flags & GNTMAP_readonly) ?
+                         get_page(&frame_table[frame], granting_d) :
+                         get_page_and_type(&frame_table[frame], granting_d,
                                            PGT_writable_page))) )
          {
              clear_bit(_GTF_writing, &sha->flags);
Simple merge
Simple merge
index 5d0bb6c0308b6980c8fd5d38940de6537e08393b,4a273526d31c4b62765903b886b3e19b8c3af295..7ac0b37ea15445bc02c9d5f4faae7c3daa9ee84d
@@@ -317,50 -266,33 +310,45 @@@ struct ptwr_info 
  #define PTWR_CLEANUP_ACTIVE 1
  #define PTWR_CLEANUP_INACTIVE 2
  
- void ptwr_flush(const int);
- int ptwr_do_page_fault(unsigned long);
- int new_guest_cr3(unsigned long pfn);
- void propagate_page_fault(unsigned long addr, u16 error_code);
+ int  ptwr_init(struct domain *);
+ void ptwr_destroy(struct domain *);
+ void ptwr_flush(struct domain *, const int);
+ int  ptwr_do_page_fault(struct domain *, unsigned long);
  
- #define __cleanup_writable_pagetable(_what)                                 \
- do {                                                                        \
-     int cpu = smp_processor_id();                                           \
-     if ((_what) & PTWR_CLEANUP_ACTIVE)                                      \
-         if (ptwr_info[cpu].ptinfo[PTWR_PT_ACTIVE].l1va)                     \
-             ptwr_flush(PTWR_PT_ACTIVE);                                     \
-     if ((_what) & PTWR_CLEANUP_INACTIVE)                                    \
-         if (ptwr_info[cpu].ptinfo[PTWR_PT_INACTIVE].l1va)                   \
-             ptwr_flush(PTWR_PT_INACTIVE);                                   \
- } while ( 0 )
- #define cleanup_writable_pagetable(_d)                                    \
-     do {                                                                  \
-         if ( unlikely(VM_ASSIST((_d), VMASST_TYPE_writable_pagetables)) ) \
-         __cleanup_writable_pagetable(PTWR_CLEANUP_ACTIVE |                \
-                                      PTWR_CLEANUP_INACTIVE);              \
+ #define cleanup_writable_pagetable(_d)                                      \
+     do {                                                                    \
+         if ( unlikely(VM_ASSIST((_d), VMASST_TYPE_writable_pagetables)) ) { \
+             if ( (_d)->arch.ptwr[PTWR_PT_ACTIVE].l1va )                     \
+                 ptwr_flush((_d), PTWR_PT_ACTIVE);                           \
+             if ( (_d)->arch.ptwr[PTWR_PT_INACTIVE].l1va )                   \
+                 ptwr_flush((_d), PTWR_PT_INACTIVE);                         \
+         }                                                                   \
      } while ( 0 )
  
 +int audit_adjust_pgtables(struct domain *d, int dir, int noisy);
 +
  #ifndef NDEBUG
 -void audit_domain(struct domain *d);
 +
 +#define AUDIT_ALREADY_LOCKED ( 1u << 0 )
 +#define AUDIT_ERRORS_OK      ( 1u << 1 )
 +#define AUDIT_QUIET          ( 1u << 2 )
 +
 +void _audit_domain(struct domain *d, int flags);
 +#define audit_domain(_d) _audit_domain((_d), 0)
  void audit_domains(void);
 +
  #else
 -#define audit_domain(_d) ((void)0)
 -#define audit_domains()  ((void)0)
 +
 +#define _audit_domain(_d, _f) ((void)0)
 +#define audit_domain(_d)      ((void)0)
 +#define audit_domains()       ((void)0)
 +
  #endif
  
+ int new_guest_cr3(unsigned long pfn);
+ void propagate_page_fault(unsigned long addr, u16 error_code);
  /*
   * Caller must own d's BIGLOCK, is responsible for flushing the TLB, and must 
   * hold a reference to the page.
Simple merge